knitr::opts_chunk$set(echo = TRUE)
# 检测和安装依赖包
package_list <- c("randomForest","ggplot2","pheatmap")
# 判断R包加载是否成功来决定是否安装后再加载
for(p in package_list){
  if(!suppressWarnings(suppressMessages(require(p, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))){
    install.packages(p)
    suppressWarnings(suppressMessages(library(p, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))
  }
}

随机森林分类通常有两个实验组,本示例原文于2019年由作者发表于Nature Biotechnology封面文章,以水稻籼稻(IND)和粳稻(TEJ)((在医学中如:健康人Healthy和癌症Cancer)。这些样本分别收集于L地和H地两地。我们计划以L地样本为建立模型、5倍交叉验证筛选重要的特征,再用H样本进行异地验证证明模型的普适性。引文和数据出处见文末引文。

format2stamp步骤

# 1. 读取OTU表
otutab = read.table("otutab.txt", header=T, row.names= 1, sep="\t", comment.char = "", stringsAsFactors = F)
# 2. 读取物种注释
tax = read.table("taxonomy.txt", header=T, row.names= 1, sep="\t",comment.char = "", stringsAsFactors = F) 

# 数据按实验设计手动筛选(可选)
metadata_all = read.table("metadata.txt", header=T, row.names=1, sep="\t") 
# 如筛选soiltypesubspecies列中HIND和HTEJ两组
# sub_metadata = subset(metadata, groupID %in% c("LIND","LTEJ","HIND","HTEJ"))
# sub_metadata = subset(metadata, State %in% c("H","I","D"))
# "DSmed-ISmed" "DEmed-IEmed" "DSmed-HSmed" "DEmed-HEmed" "DSpre-DSmed" "DGpre-DEmed" "DSmed-DSpost" "DEmed-DEpost" "DSpre-DSpost" "DGpre-DEpost" "HSpre-HSmed" "HSmed-HSpost" "HGpre-HEmed" "HEmed-HEpost"
SA = "DS"
SB = "HSpre"
SC = "ISmed"
Site_A = "S"
Site_B = "E"
Site_C = "G"
State_A = "H"
State_B = "D"
State_C = "I"
Sday_A = "pre"
Sday_B = "med"
Sday_C = "pre"
#变化下Sday_C变为pre

filetial1 = "rf.txt"
filetial2 = "rf.pdf"
sub_metadatanames = "sub_metadata"
rfcv_names = "rfcv"
importance_names = "importance"
top_f = "top_feautres"
prediction_name = "prediction_binary"
test_names = "test"
class_names = "family"
result_name = "result"

result_file_names <- paste(result_name,Site_A,Site_B,sep ="_")
sub_metadata_filename <- paste(sub_metadatanames,Site_A,Site_B,filetial1,sep ="_")
rfcv_filename_txt <- paste(rfcv_names,Site_A,Site_B,filetial1,sep ="_")
rfcv_filename_pdf <- paste(rfcv_names,Site_A,Site_B,filetial2,sep ="_")
importance_filename_txt <- paste(importance_names,Site_A,Site_B,filetial1,sep ="_")
top_feautre_filename <- paste(top_f,Site_A,Site_B,filetial2,sep ="_")
prediction_filename <- paste(prediction_name,Site_A,Site_B,filetial1,sep ="_")
testfilename1 <- paste(class_names,test_names,Site_A,filetial2,sep ="_")
testfilename2 <- paste(class_names,test_names,Site_B,filetial2,sep ="_")


print(sub_metadata_filename)
## [1] "sub_metadata_S_E_rf.txt"
print(rfcv_filename_txt)
## [1] "rfcv_S_E_rf.txt"
print(rfcv_filename_pdf)
## [1] "rfcv_S_E_rf.pdf"
print(importance_filename_txt)
## [1] "importance_S_E_rf.txt"
print(top_feautre_filename)
## [1] "top_feautres_S_E_rf.pdf"
print(prediction_filename)
## [1] "prediction_binary_S_E_rf.txt"
print(testfilename1)
## [1] "family_test_S_rf.pdf"
print(testfilename2)
## [1] "family_test_E_rf.pdf"
print(result_file_names)
## [1] "result_S_E"
# 筛选自己想要的分组以及内容
# sub_metadata1 = subset(metadata_all, Day1 %in% c("post"))
# sub_metadata1
# sub_metadata2 = subset(sub_metadata1, State %in% c("H","D"))
# sub_metadata2

sub_metadata1 = subset(metadata_all, DayE %in% c(Sday_A,Sday_B,"post"))
sub_metadata1
sub_metadata2 = subset(sub_metadata1, Site_1 %in% c(Site_A,Site_B))
sub_metadata2
# 实验设计与输入文件交叉筛选。
idx = rownames(sub_metadata2) %in% colnames(otutab)
sub_metadata3 = sub_metadata2[idx,]
sub_metadata3
sub_otutab_tax = otutab[,rownames(sub_metadata3)]
sub_otutab_tax
write.table(sub_metadata3,file = sub_metadata_filename,quote = F,sep = '\t', row.names = T, col.names = T)


# OTU丰度筛选阈值,默认0.1%,0为来筛选
thre = 0.02
# 输出文件名前缀
prefix = "tax_pre_post"

# 生成各分类级汇总特征表
suppressWarnings(suppressMessages(library(amplicon)))
format2stamp(sub_otutab_tax, tax, thre, prefix)
## Warning: 程辑包'dplyr'是用R版本4.3.2 来建造的
# 在当前目录生成tax_1-8共7个级别+OTU过滤文件

样本随机分组(可选)

注:数据可以自由分组,如选择50%-80%建模,其余部分验证。为提高模型准确性,可以整合更多来源的数据,如北京、上海中一半作模型,另一半做验证,有可能提高验证时预测的准确率。随机取样代码如下:

# 假设从一百个样本中随机取70个,且无放回
# idx = sample(1:100, size = 70, replace = F)
# # 选择的样本标记为TRUE,未选择的为FALSE
# idx = 1:100 %in% idx
# 再用这个索引idx筛选对应的数据表,一部分作为训练集(train),另一部分作为测试集(test)
# train=metadata[idx,]
# test=metadata[!idx,]

#### 有自己设计好的训练集就不需要运行这一步。

# 自己设计
# idx1 = sample(1:214, size = 140, replace = F)
# # 选择的样本标记为TRUE,未选择的为FALSE
# idx = 1:214 %in% idx1
# idx

分类级选择(可选)

先使用format2stamp.Rmd基于OTU表(otutab.txt)、物种注释(taxonomy.txt)和元数据(metadata.txt)筛选样本、高丰度特征,并分类汇总各分类级(tax_1-8)。然后对各分类级进行准确性评估

# 读取实验设计、和物种分类文件
# getwd()  #显示当前工作目录
# setwd("C:/16S/result") 
# metadata_f = read.table("metadata_DSmed_HSmed.txt",header = T, row.names = 1)
# 实验用的数据重命名
metadata_f = sub_metadata3
metadata_f
# R4.0读取表不于默认为数据框
# metadata$Group = as.factor(metadata$Group)
# 筛选"L"地点为训练集
# metadata = subset(metadata, soiltype  %in% c("L"))
# 自己设置训练集和测试级
# train = metadata_f[idx,]
# test = metadata_f[!idx,]
# 设置训练集
train = subset(metadata_f, Site_1 %in% c("S","E"))
train
idx3 = sample(1:386, size = 200, replace = F)
# 注size=67,66比较小
# 选择的样本标记为TRUE,未选择的为FALSE
idx3 = 1:386 %in% idx3
#train2 = metadata_f[idx3,]
train2 = train[idx3,] # train2是根据实验设计在train里面又筛选的作为训练集的数据。测试集是剩余部分。
train2
train2$Site_1 = as.factor(train2$Site_1)
summary(train2)
##    Filename          Group_day          Group_day1         Group_dayE       
##  Length:200         Length:200         Length:200         Length:200        
##  Class :character   Class :character   Class :character   Class :character  
##  Mode  :character   Mode  :character   Mode  :character   Mode  :character  
##                                                                             
##                                                                             
##                                                                             
##   Group_dayS           Group            StateSite         StateSite_1       
##  Length:200         Length:200         Length:200         Length:200        
##  Class :character   Class :character   Class :character   Class :character  
##  Mode  :character   Mode  :character   Mode  :character   Mode  :character  
##                                                                             
##                                                                             
##                                                                             
##     State               Site           Site_1       Day        
##  Length:200         Length:200         E: 71   Min.   : 0.000  
##  Class :character   Class :character   S:129   1st Qu.: 0.000  
##  Mode  :character   Mode  :character           Median : 3.000  
##                                                Mean   : 3.395  
##                                                3rd Qu.: 7.000  
##                                                Max.   :10.000  
##     Day_num           Day1               DayE            Individual       
##  Min.   : 0.000   Length:200         Length:200         Length:200        
##  1st Qu.: 0.000   Class :character   Class :character   Class :character  
##  Median : 3.000   Mode  :character   Mode  :character   Mode  :character  
##  Mean   : 3.395                                                           
##  3rd Qu.: 7.000                                                           
##  Max.   :10.000
# 物种分类文件,由usearch10 -sintax_summary生成,详见扩增子分析流程系列。但存在1对多时无法对应分类级颜色(如Unassigned可能属于多个门),使用format2stamp.Rmd保留各级别名称
# library(randomForest)
# # "1Kingdom",界只有细菌、古菌类别太少;"7Species",扩增子中不太可信.8OTU0.02根据自己设的值来分组
# for(i in c("2Phylum","3Class","4Order","5Family","6Genus","8OTU0.02")){
#   # i="5Family"
#   set.seed(0)
#   table = read.table(paste0("tax_",i,".txt"),header = T, row.names = 1)
#   table = table[,rownames(metadata)]
#   rf = randomForest(t(table), train$group, importance=T, proximity=T, ntree = 1000)
#   print(i)
#   print(rf)
# }

# 筛选一个合适的分类等级
library(randomForest)
# 自己运行
for(i in c("2Phylum","3Class","4Order","5Family","6Genus","8OTU0.02")){
  # i="5Family"
  set.seed(0)
  table_f = read.table(paste0("tax_all_",i,".txt"),header = T, row.names = 1)
  table_f = table_f[,rownames(train2)]
  train2$Site_1 = as.factor(train2$Site_1)
  rf = randomForest(t(table_f), train2$Site_1, importance=T, proximity=T, ntree = 5000)
  print(i)
  print(rf)
}
## [1] "2Phylum"
## 
## Call:
##  randomForest(x = t(table_f), y = train2$Site_1, ntree = 5000,      importance = T, proximity = T) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 3
## 
##         OOB estimate of  error rate: 25.5%
## Confusion matrix:
##    E   S class.error
## E 31  40  0.56338028
## S 11 118  0.08527132
## [1] "3Class"
## 
## Call:
##  randomForest(x = t(table_f), y = train2$Site_1, ntree = 5000,      importance = T, proximity = T) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 4
## 
##         OOB estimate of  error rate: 21.5%
## Confusion matrix:
##    E   S class.error
## E 41  30   0.4225352
## S 13 116   0.1007752
## [1] "4Order"
## 
## Call:
##  randomForest(x = t(table_f), y = train2$Site_1, ntree = 5000,      importance = T, proximity = T) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 5
## 
##         OOB estimate of  error rate: 21%
## Confusion matrix:
##    E   S class.error
## E 39  32  0.45070423
## S 10 119  0.07751938
## [1] "5Family"
## 
## Call:
##  randomForest(x = t(table_f), y = train2$Site_1, ntree = 5000,      importance = T, proximity = T) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 7
## 
##         OOB estimate of  error rate: 22%
## Confusion matrix:
##    E   S class.error
## E 36  35  0.49295775
## S  9 120  0.06976744
## [1] "6Genus"
## 
## Call:
##  randomForest(x = t(table_f), y = train2$Site_1, ntree = 5000,      importance = T, proximity = T) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 18.5%
## Confusion matrix:
##    E   S class.error
## E 40  31  0.43661972
## S  6 123  0.04651163
## [1] "8OTU0.02"
## 
## Call:
##  randomForest(x = t(table_f), y = train2$Site_1, ntree = 5000,      importance = T, proximity = T) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 23
## 
##         OOB estimate of  error rate: 12.5%
## Confusion matrix:
##    E   S class.error
## E 48  23  0.32394366
## S  2 127  0.01550388
# OOB estimate of  error rate: 越小越好。

# OTU最低最适合





# 本次观察到科水平最准确,以后使用科水平分析,可以将筛选的结果做成拆线图作为附图

最佳水平数据读取和统计

读取实验设计、Feature表,并进行数据筛选和交叉筛选

# 读取实验设计、和物种分类文件
# metadata = read.table("metadata_DSmed_HSmed.txt",header = T, row.names = 1)
# metadata = sub_metadata3
# R4.0读取表不于默认为数据框
# metadata$group = as.factor(metadata$group)



train = subset(metadata_f, Site_1 %in% c("S","E"))
train
idx3 = sample(1:386, size = 200, replace = F)
# 注size=67,66比较小
# 选择的样本标记为TRUE,未选择的为FALSE
idx3 = 1:386 %in% idx3
#train2 = metadata_f[idx3,]
train2 = train[idx3,] # train2是根据实验设计在train里面又筛选的作为训练集的数据。测试集是剩余部分。
train2
train2$Site_1 = as.factor(train2$Site_1)
summary(train2)
##    Filename          Group_day          Group_day1         Group_dayE       
##  Length:200         Length:200         Length:200         Length:200        
##  Class :character   Class :character   Class :character   Class :character  
##  Mode  :character   Mode  :character   Mode  :character   Mode  :character  
##                                                                             
##                                                                             
##                                                                             
##   Group_dayS           Group            StateSite         StateSite_1       
##  Length:200         Length:200         Length:200         Length:200        
##  Class :character   Class :character   Class :character   Class :character  
##  Mode  :character   Mode  :character   Mode  :character   Mode  :character  
##                                                                             
##                                                                             
##                                                                             
##     State               Site           Site_1       Day           Day_num     
##  Length:200         Length:200         E: 83   Min.   : 0.00   Min.   : 0.00  
##  Class :character   Class :character   S:117   1st Qu.: 0.00   1st Qu.: 0.00  
##  Mode  :character   Mode  :character           Median : 3.00   Median : 3.00  
##                                                Mean   : 3.44   Mean   : 3.44  
##                                                3rd Qu.: 7.00   3rd Qu.: 7.00  
##                                                Max.   :10.00   Max.   :10.00  
##      Day1               DayE            Individual       
##  Length:200         Length:200         Length:200        
##  Class :character   Class :character   Class :character  
##  Mode  :character   Mode  :character   Mode  :character  
##                                                          
##                                                          
## 
# 根据之前筛选情况读取物种分类水平数据。读取科水平特征表
table =read.table("tax_all_6Genus.txt",header = T, row.names = 1)
# 筛选L样品作为训练集
# metadata_train = subset(metadata, soiltype %in% c("L"))
# train2重命名,方便后面区分。
metadata_train1 = train2
summary(metadata_train1)
##    Filename          Group_day          Group_day1         Group_dayE       
##  Length:200         Length:200         Length:200         Length:200        
##  Class :character   Class :character   Class :character   Class :character  
##  Mode  :character   Mode  :character   Mode  :character   Mode  :character  
##                                                                             
##                                                                             
##                                                                             
##   Group_dayS           Group            StateSite         StateSite_1       
##  Length:200         Length:200         Length:200         Length:200        
##  Class :character   Class :character   Class :character   Class :character  
##  Mode  :character   Mode  :character   Mode  :character   Mode  :character  
##                                                                             
##                                                                             
##                                                                             
##     State               Site           Site_1       Day           Day_num     
##  Length:200         Length:200         E: 83   Min.   : 0.00   Min.   : 0.00  
##  Class :character   Class :character   S:117   1st Qu.: 0.00   1st Qu.: 0.00  
##  Mode  :character   Mode  :character           Median : 3.00   Median : 3.00  
##                                                Mean   : 3.44   Mean   : 3.44  
##                                                3rd Qu.: 7.00   3rd Qu.: 7.00  
##                                                Max.   :10.00   Max.   :10.00  
##      Day1               DayE            Individual       
##  Length:200         Length:200         Length:200        
##  Class :character   Class :character   Class :character  
##  Mode  :character   Mode  :character   Mode  :character  
##                                                          
##                                                          
## 
# 筛选OTU
idx = rownames(metadata_train1) %in% colnames(table)
metadata_train1 = metadata_train1[idx,]
otu_sub1 = table[, rownames(metadata_train1)] 
otu_sub1
dim(otu_sub1)
## [1]  92 200

选择最佳随机数(可选)

library(randomForest)
for (i in 0:9){
  set.seed(i)
  rf2 = randomForest(t(otu_sub1), metadata_train1$Site_1, importance=TRUE, proximity=TRUE, ntree = 5000)
  print(i)
  print(rf2)
}
## [1] 0
## 
## Call:
##  randomForest(x = t(otu_sub1), y = metadata_train1$Site_1, ntree = 5000,      importance = TRUE, proximity = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 17%
## Confusion matrix:
##    E   S class.error
## E 61  22   0.2650602
## S 12 105   0.1025641
## [1] 1
## 
## Call:
##  randomForest(x = t(otu_sub1), y = metadata_train1$Site_1, ntree = 5000,      importance = TRUE, proximity = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 18%
## Confusion matrix:
##    E   S class.error
## E 62  21   0.2530120
## S 15 102   0.1282051
## [1] 2
## 
## Call:
##  randomForest(x = t(otu_sub1), y = metadata_train1$Site_1, ntree = 5000,      importance = TRUE, proximity = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 17.5%
## Confusion matrix:
##    E   S class.error
## E 61  22   0.2650602
## S 13 104   0.1111111
## [1] 3
## 
## Call:
##  randomForest(x = t(otu_sub1), y = metadata_train1$Site_1, ntree = 5000,      importance = TRUE, proximity = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 18.5%
## Confusion matrix:
##    E   S class.error
## E 61  22   0.2650602
## S 15 102   0.1282051
## [1] 4
## 
## Call:
##  randomForest(x = t(otu_sub1), y = metadata_train1$Site_1, ntree = 5000,      importance = TRUE, proximity = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 17.5%
## Confusion matrix:
##    E   S class.error
## E 60  23   0.2771084
## S 12 105   0.1025641
## [1] 5
## 
## Call:
##  randomForest(x = t(otu_sub1), y = metadata_train1$Site_1, ntree = 5000,      importance = TRUE, proximity = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 18.5%
## Confusion matrix:
##    E   S class.error
## E 60  23   0.2771084
## S 14 103   0.1196581
## [1] 6
## 
## Call:
##  randomForest(x = t(otu_sub1), y = metadata_train1$Site_1, ntree = 5000,      importance = TRUE, proximity = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 17.5%
## Confusion matrix:
##    E   S class.error
## E 60  23   0.2771084
## S 12 105   0.1025641
## [1] 7
## 
## Call:
##  randomForest(x = t(otu_sub1), y = metadata_train1$Site_1, ntree = 5000,      importance = TRUE, proximity = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 17%
## Confusion matrix:
##    E   S class.error
## E 62  21   0.2530120
## S 13 104   0.1111111
## [1] 8
## 
## Call:
##  randomForest(x = t(otu_sub1), y = metadata_train1$Site_1, ntree = 5000,      importance = TRUE, proximity = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 17%
## Confusion matrix:
##    E   S class.error
## E 61  22   0.2650602
## S 12 105   0.1025641
## [1] 9
## 
## Call:
##  randomForest(x = t(otu_sub1), y = metadata_train1$Site_1, ntree = 5000,      importance = TRUE, proximity = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 16%
## Confusion matrix:
##    E   S class.error
## E 61  22  0.26506024
## S 10 107  0.08547009

随机森林分类

在确定的分类层级和最佳随机数下建模

library(randomForest)
set.seed(0)
rf3 = randomForest(t(otu_sub1), metadata_train1$Site_1, importance=TRUE, proximity=TRUE, ntree = 5000)
print(rf3)
## 
## Call:
##  randomForest(x = t(otu_sub1), y = metadata_train1$Site_1, ntree = 5000,      importance = TRUE, proximity = TRUE) 
##                Type of random forest: classification
##                      Number of trees: 5000
## No. of variables tried at each split: 9
## 
##         OOB estimate of  error rate: 17%
## Confusion matrix:
##    E   S class.error
## E 61  22   0.2650602
## S 12 105   0.1025641

交叉验证选择重要特征

set.seed(1) # 随机数据保证结果可重复,必须
# rfcv是随机森林交叉验证函数:Random Forest Cross Validation
result = rfcv(t(otu_sub1), metadata_train1$Site_1, cv.fold=5)
# 查看错误率表,31时错误率最低,为最佳模型
result$error.cv
##    92    46    23    12     6     3     1 
## 0.210 0.200 0.215 0.205 0.250 0.300 0.425
# 绘制验证结果 
with(result, plot(n.var, error.cv, log="x", type="o", lwd=2))

# 多次绘制
## 建立数据框保存多次结果
error.cv0 = data.frame(num = result$n.var, error.1 =  result$error.cv)
## 指定随机数循环5次
for (i in 1:(1+9)){
  print(i)
  set.seed(i)
  result= rfcv(t(otu_sub1), metadata_train1$Site_1, cv.fold=5) #  scale = "log", step = 0.9
  error.cv0 = cbind(error.cv0, result$error.cv)
}
## [1] 1
## [1] 2
## [1] 3
## [1] 4
## [1] 5
## [1] 6
## [1] 7
## [1] 8
## [1] 9
## [1] 10
error.cv0 

绘制交叉验证曲线

# 提取x轴标签
n.var = error.cv0$num
# 提取y轴数据+标签
error.cv = error.cv0[,2:6]
colnames(error.cv) = paste('err',1:5,sep='.')
# 添加均值
err.mean = apply(error.cv,1,mean)
# 合并新的数据库,x+error+mean
allerr = data.frame(num=n.var,err.mean=err.mean,error.cv)
# number of otus selected 人为在图中观察的结果,30几乎为最低,且数量可接受
optimal = 15

# 图1:机器学习结果交叉验证图,选择Top features
# 图中 + 5条灰色拆线+1条黑色均值拆线+一条最优垂线+X轴对数变换
write.table(allerr, file = rfcv_filename_txt, sep = "\t", quote = F, row.names = T, col.names = T)

p = ggplot() + # 开始绘图
  geom_line(aes(x = allerr$num, y = allerr$err.1), colour = 'grey') + # 5次验证灰线 
  geom_line(aes(x = allerr$num, y = allerr$err.2), colour = 'grey') + 
  geom_line(aes(x = allerr$num, y = allerr$err.3), colour = 'grey') + 
  geom_line(aes(x = allerr$num, y = allerr$err.4), colour = 'grey') + 
  geom_line(aes(x = allerr$num, y = allerr$err.5), colour = 'grey') + 
  geom_line(aes(x = allerr$num, y = allerr$err.mean), colour = 'black') + # 均值黑线
  geom_vline(xintercept = optimal, colour='black', lwd=0.36, linetype="dashed") + # 最优垂线
  coord_trans(x = "log2") + # X轴对数变换和刻度
  scale_x_continuous(breaks = c(1, 2, 5, 10, 20, 30, 50, 100, 200)) + # , max(allerr$num)
  labs(title=paste('Training set (n = ', dim(t(otu_sub1))[1],')', sep = ''), 
       x='Number of families ', y='Cross-validation error rate') + 
  annotate("text", x = optimal, y = max(allerr$err.mean), label=paste("optimal = ", optimal, sep="")) + theme_bw()
p  

ggsave(p, file = rfcv_filename_pdf, width = 180, height = 120, unit = 'mm')

特征重要性可视化

## 预览和保存特征贡献度
imp= as.data.frame(rf3$importance)
imp = imp[order(imp$MeanDecreaseAccuracy, decreasing = T),]
# bbb = head(imp,n=optimal)
write.table(imp,file = importance_filename_txt,quote = F,sep = '\t', row.names = T, col.names = T)
# 简单可视化,比较丑
# varImpPlot(rf, main = "Feature importance",n.var = optimal, bg = par("bg"), color = par("fg"), gcolor = par("fg"), lcolor = "gray" )

# 图2. Feature重要性:绘制条形图+门属性着色

# 读取所有feature贡献度
imp2 = read.table(importance_filename_txt, header=T, row.names= 1, sep="\t") 
# 分析选择top20分组效果最好,参数显示数量
imp2 = head(imp2, n = optimal)
imp2 = imp2[order(imp2$MeanDecreaseAccuracy, decreasing = F),]
# 简化全名,去掉界
imp2$Family = gsub("Bacteria\\|","",rownames(imp2))
# 添加门用于着色(删除竖线后面全部)
imp2$Phylum = gsub("\\|.*","",imp2$Family)

# 设置顺序
imp2$Family = factor(imp2$Family, levels = imp2$Family)

# 图2. 绘制物种类型种重要性柱状图
p = ggplot(imp2, aes(x = Family, y = MeanDecreaseAccuracy, fill = Phylum)) +   
  geom_bar(stat = "identity") + 
  coord_flip() + theme_bw()
p

ggsave(top_feautre_filename, p, width=200*2.5, height=59*2, unit='mm')
# 名称不一定唯一,需要手动修改

#  简化全名(只保留最后,有重名不可用,可选)
# imp$Family = gsub(".*\\|","",imp$Family)
# imp$Family = factor(imp$Family, levels = imp$Family)
# p = ggplot(imp, aes(x = Family, y = MeanDecreaseAccuracy, fill = Phylum)) +   
#   geom_bar(stat = "identity") + 
#   coord_flip() + theme_bw()
# p
# ggsave(paste("top_feautre",".pdf", sep=""), p, width=89*1.5, height=59*1.5, unit='mm')

测试集独立验证

如果第一地点数据量足够大,可以取出1/2到1/3进行同一地点的独立验证。方法相同。

筛选测序集样品

# metadata_test = subset(metadata, soiltype %in% c("H")) 
# summary(metadata_test)
# idx = rownames(metadata_test) %in% colnames(table)
# metadata_test = metadata_test[idx,]
# otu_sub = table[,rownames(metadata_test)]

# metadata_test = subset(metadata_f, Site_1 %in% c("E")) 
# summary(metadata_test)
# idx = rownames(metadata_test) %in% colnames(table)
# metadata_test = metadata_test[idx,]
# otu_sub2 = table[,rownames(metadata_test)]

# idx3 = sample(1:123, size = 67, replace = F)
# 注size=67,66比较小
# 选择的样本标记为TRUE,未选择的为FALSE
# idx3 = 1:123 %in% idx3
# idx2 = sample(1:123, size = 60, replace = F)
# # 选择的样本标记为TRUE,未选择的为FALSE
# idx2 = 1:123 %in% idx2

test = train[!idx3,]
test
#test = metadata_f[!idx3,]
metadata_test = test
summary(metadata_test)
##    Filename          Group_day          Group_day1         Group_dayE       
##  Length:186         Length:186         Length:186         Length:186        
##  Class :character   Class :character   Class :character   Class :character  
##  Mode  :character   Mode  :character   Mode  :character   Mode  :character  
##                                                                             
##                                                                             
##                                                                             
##   Group_dayS           Group            StateSite         StateSite_1       
##  Length:186         Length:186         Length:186         Length:186        
##  Class :character   Class :character   Class :character   Class :character  
##  Mode  :character   Mode  :character   Mode  :character   Mode  :character  
##                                                                             
##                                                                             
##                                                                             
##     State               Site              Site_1               Day        
##  Length:186         Length:186         Length:186         Min.   : 0.000  
##  Class :character   Class :character   Class :character   1st Qu.: 0.000  
##  Mode  :character   Mode  :character   Mode  :character   Median : 3.000  
##                                                           Mean   : 3.183  
##                                                           3rd Qu.: 7.000  
##                                                           Max.   :10.000  
##     Day_num           Day1               DayE            Individual       
##  Min.   : 0.000   Length:186         Length:186         Length:186        
##  1st Qu.: 0.000   Class :character   Class :character   Class :character  
##  Median : 3.000   Mode  :character   Mode  :character   Mode  :character  
##  Mean   : 3.183                                                           
##  3rd Qu.: 7.000                                                           
##  Max.   :10.000
idx4 = rownames(metadata_test) %in% colnames(table)
metadata_test = metadata_test[idx4,]
otu_sub2 = table[,rownames(metadata_test)]



# 转置,并添加分组信息
otutab_t2 = as.data.frame(t(otu_sub2))
otutab_t2$Site_1 = metadata_test[rownames(otutab_t2),]$Site_1

基于训练集随机森林模型验证

set.seed(315)
otutab.pred = predict(rf3, t(otu_sub2) )  
pre_tab = table(observed=otutab_t2[,"Site_1"],
                predicted=otutab.pred) 
pre_tab
##         predicted
## observed   E   S
##        E  49  23
##        S   7 107
# for (i in 0:30){
#    set.seed(i)
#    otutab.pred = predict(rf3, t(otu_sub2) )  
#    pre_tab = table(observed=otutab_t2[,"State"],
#                 predicted=otutab.pred) 
#    print(pre_tab)
# }

可视化验证结果

# 整理样本原始分组和预测分类
predict = data.frame(group = otutab_t2[,"Site_1"], predicted=otutab.pred)

# 保存预测结果表
write.table("SampleID\t", file=prediction_filename,append = F, quote = F, eol = "", row.names = F, col.names = F)
write.table(predict, file = prediction_filename,append = T, quote = F, row.names = T, col.names = T, sep = "\t")
## Warning in write.table(predict, file = prediction_filename, append = T, :
## 给文件加列名
# 转换为数值可视化
# 预测准确标为1,错误标为0
predict$result = ifelse(predict$group == predict$predicted, 1, 0)
# IND=1, TEJ=2
predict$predict = ifelse(predict$predicted == "S", 1, 2)
# Set sample number in each row
column = 10

AA1 = predict[predict$group=="S",]$predict
length(AA1)
## [1] 114
row = round(length(AA1)/column + 0.5)
row
## [1] 12
i = column * row - length(AA1)
AA1 = c(AA1, rep(NA, i))
matrix1 = matrix(AA1, nrow = row, ncol = column, byrow = T)
pheatmap(matrix1, cluster_rows = F, cluster_cols = F, cellwidth = 15, cellheight = 12)

#pheatmap(matrix1, cluster_rows = F, cluster_cols = F, cellwidth = 15, cellheight = 12,filename = "family_test_DD.pdf")
pheatmap(matrix1, cluster_rows = F, cluster_cols = F, cellwidth = 18, cellheight = 18,filename = testfilename1)
# Draw TEJ prediction result
BB1 = predict[predict$group=="E",]$predict
length(BB1)
## [1] 72
row = round(length(BB1)/column + 0.5)
i = column * row - length(BB1)
BB1 = c(BB1, rep(NA, i))
matrix2 = matrix(BB1, nrow = row, ncol = column, byrow = T)
pheatmap(matrix2, cluster_rows = F, cluster_cols = F, cellwidth = 15, cellheight = 12)
# 保存图片
#pheatmap(matrix2, cluster_rows = F, cluster_cols = F, cellwidth = 18, cellheight = 18, filename = "family_test_HH.pdf")
pheatmap(matrix2, cluster_rows = F, cluster_cols = F, cellwidth = 18, cellheight = 18, filename = testfilename2)



## 将生成的文件放入指定文件夹
tarDir <- result_file_names
dir.create(tarDir)
## Warning in dir.create(tarDir): 'result_S_E'已存在
# 选取含特定字符创的文件(本例选取后缀为csv的所有文件)
tobeCopy1 <- list.files(".", pattern="*_rf.txt")
tobeCopy2 <- list.files(".", pattern="*_rf.pdf")
# 复制选中文件
sapply(tobeCopy1,function(x){file.copy(paste(".",x,sep="/"),tarDir,overwrite = TRUE)})
##        importance_S_E_rf.txt prediction_binary_S_E_rf.txt 
##                         TRUE                         TRUE 
##              rfcv_S_E_rf.txt      sub_metadata_S_E_rf.txt 
##                         TRUE                         TRUE
sapply(tobeCopy2,function(x){file.copy(paste(".",x,sep="/"),tarDir,overwrite = TRUE)})
##    family_test_E_rf.pdf    family_test_S_rf.pdf         rfcv_S_E_rf.pdf 
##                    TRUE                    TRUE                    TRUE 
## top_feautres_S_E_rf.pdf 
##                    TRUE
# 删除转移过的文件
allfile = dir()
txtfile1 <- grep("*_rf.txt",allfile)
txtfile2 <- grep("*_rf.pdf",allfile)
file.remove(allfile[txtfile1])
## [1] TRUE TRUE TRUE TRUE
file.remove(allfile[txtfile2])
## [1] TRUE TRUE TRUE TRUE

使用此脚本,请引用下文:

If used this script, please cited:

Yong-Xin Liu, Lei Chen, Tengfei Ma, Xiaofang Li, Maosheng Zheng, Xin Zhou, Liang Chen, Xubo Qian, Jiao Xi, Hongye Lu, Huiluo Cao, Xiaoya Ma, Bian Bian, Pengfan Zhang, Jiqiu Wu, Ren-You Gan, Baolei Jia, Linyang Sun, Zhicheng Ju, Yunyun Gao, Tao Wen, Tong Chen. 2023. EasyAmplicon: An easy-to-use, open-source, reproducible, and community-based pipeline for amplicon data analysis in microbiome research. iMeta 2: e83. https://doi.org/10.1002/imt2.83

Copyright 2016-2023 Yong-Xin Liu , Tao Wen , Tong Chen